In [1]:
%pylab
import numpy as np
import pylab as plb
import tifffile
Using matplotlib backend: WXAgg
Populating the interactive namespace from numpy and matplotlib

In [2]:
#these functions are used for the image overlays.
def overlay_hue_saturation(overlay,bkgnd,
                       gama = 1.0,
                       color_map = plb.cm.jet,
                       alpha = 0.5):
    from skimage import data, color, io, img_as_float
    gama_correct = lambda x: x**gama
    if len(np.shape(bkgnd)) == 2:
        img_color = np.dstack((bkgnd, bkgnd, bkgnd))
    else:
        img_color = bkgnd
        
    if len(np.shape(overlay)) == 2:
        overlay_color = color_map(gama_correct(overlay))[:,:,:3]
        mask = overlay>0
    else:
        overlay_color = overlay
        mask = np.sum(overlay.astype(uint64),axis = 2)>0
    img_hsv = color.rgb2hsv(img_color)
    overlay_hsv = color.rgb2hsv(overlay_color)
    img_hsv[..., 0] += mask*overlay_hsv[..., 0] * alpha
    img_hsv[..., 1] += mask*overlay_hsv[..., 1] * alpha
    img_overlay = color.hsv2rgb(img_hsv)
    return(img_overlay)

def colorized_image(input_img,
                    map_point = 0.5,
                    color_map = plb.cm.jet,
                    alpha = 0.2,
                    gama = 1.0,
                    gain = 1.0):
    from skimage import data, color, io, img_as_float
    gama_correct = lambda x: x**gama
    img = gain*input_img
    img = gama_correct(img)
    img[img>255] = 255
    img = img.astype(uint8)
    bg_color = np.dstack((img, img, img))
    
    mask = img>0
    map_img = mask*map_point
    
    overlay_color = color_map(map_img)[:,:,:3]
    img_hsv = color.rgb2hsv(bg_color)
    
    overlay_hsv = color.rgb2hsv(overlay_color)
    #make hue and saturation of img map to color
    img_hsv[..., 0] += mask*overlay_hsv[..., 0] * alpha
    img_hsv[..., 1] += mask*overlay_hsv[..., 1] * alpha
    img_overlay = color.hsv2rgb(img_hsv)
    return(img_overlay)

def mask_stack(top_img,bottom_img,alpha = 1.0,gain = 8):
    from skimage import data, color, io, img_as_float
    top_hsv = color.rgb2hsv(top_img)
    mask = top_hsv[...,2]/np.max(top_hsv[...,2])
    mask *= gain
    mask[mask>1] = 1
    mask = np.dstack((mask,mask,mask)) 
    inverse = 1-mask
    return top_img*mask + bottom_img*inverse
In [3]:
#load images
bkgnd_img = tifffile.TiffFile('./registration/65G06_brightfield_cuticle.tif').asarray()
b1_stack = tifffile.TiffFile('./registration/combined_stack_b1.tiff').asarray()
b2_stack = tifffile.TiffFile('./registration/combined_stack_b2.tiff').asarray()
b3_stack = tifffile.TiffFile('./registration/combined_stack_b3.tiff').asarray()

i1_stack = tifffile.TiffFile('./registration/combined_stack_i1.tiff').asarray()
i2_stack = tifffile.TiffFile('./registration/combined_stack_i2.tiff').asarray()

iii1_stack = tifffile.TiffFile('./registration/combined_stack_iii1.tiff').asarray()
iii3_stack = tifffile.TiffFile('./registration/combined_stack_iii3.tiff').asarray()
iii24_stack = tifffile.TiffFile('./registration/combined_stack_iii24.tiff').asarray()

hg1_stack = tifffile.TiffFile('./registration/combined_stack_hg1.tiff').asarray()
hg2_stack = tifffile.TiffFile('./registration/combined_stack_hg2.tiff').asarray()
hg3_stack = tifffile.TiffFile('./registration/combined_stack_hg3.tiff').asarray()
hg4_stack = tifffile.TiffFile('./registration/combined_stack_hg4.tiff').asarray()
#make max projections
b1_img = np.max(b1_stack,axis = 0)
b2_img = np.max(b2_stack,axis = 0)
b3_img = np.max(b3_stack,axis = 0)

i1_img = np.max(i1_stack,axis = 0)
i2_img = np.max(i2_stack,axis = 0)

iii1_img = np.max(iii1_stack,axis = 0)
iii3_img = np.max(iii3_stack,axis = 0)
iii24_img = np.max(iii24_stack,axis = 0)

hg1_img = np.max(hg1_stack,axis = 0)
hg2_img = np.max(hg2_stack,axis = 0)
hg3_img = np.max(hg3_stack,axis = 0)
hg4_img = np.max(hg4_stack,axis = 0)
In [4]:
#colorize each muscle
b_map = plb.cm.nipy_spectral
i_map = plb.cm.nipy_spectral
iii_map = plb.cm.nipy_spectral
cvls = {'b1':0.1,'b2':0.13,'b3':0.16,
        'i1':0.3,'i2':0.33,
        'iii1':0.6,'iii3':0.63,'iii24':0.66,
        'hg1':0.80,'hg2':0.83,'hg3':0.86,'hg4':0.89}

b1_colorized = colorized_image(b1_img,color_map = b_map,map_point = cvls['b1'],alpha = 1.0)
b2_colorized = colorized_image(b2_img,color_map = b_map,map_point = cvls['b2'],alpha = 1.0)
b3_colorized = colorized_image(b3_img,color_map = b_map,map_point = cvls['b3'],alpha = 1.0,gama = 0.65,gain = 40.0)

i1_colorized = colorized_image(i1_img,color_map = i_map,map_point = cvls['i1'],alpha = 1.0)
i2_colorized = colorized_image(i2_img,color_map = i_map,map_point = cvls['i2'],alpha = 1.0,gama = 0.7,gain = 10.0)

iii1_colorized = colorized_image(iii1_img,color_map = iii_map,map_point = cvls['iii1'],alpha = 1.0)
iii3_colorized = colorized_image(iii3_img,color_map = iii_map,map_point = cvls['iii3'],alpha = 1.0)
iii24_colorized = colorized_image(iii24_img,color_map = iii_map,map_point = cvls['iii24'],alpha = 1.0)

hg1_colorized = colorized_image(hg1_img,color_map = iii_map,map_point = cvls['hg1'],alpha = 1.0,gain = 2.0)
hg2_colorized = colorized_image(hg2_img,color_map = iii_map,map_point = cvls['hg2'],alpha = 1.0)
hg3_colorized = colorized_image(hg3_img,color_map = iii_map,map_point = cvls['hg3'],alpha = 1.0)
hg4_colorized = colorized_image(hg4_img,color_map = iii_map,map_point = cvls['hg4'],alpha = 1.0)

#combine the images using addition
img_sum = (b1_colorized+
           b2_colorized+
           b3_colorized+
           i1_colorized+
           i2_colorized+
           iii1_colorized+
           iii3_colorized+
           iii24_colorized+
           hg1_colorized+
           hg2_colorized+
           hg3_colorized+
           hg4_colorized)*255

img_sum[img_sum>255] = 255
img_sum = img_sum.astype(uint8)
imshow(overlay_hue_saturation(img_sum[:,:,:3],bkgnd_img,alpha = 1.0))
display(gcf());close()

I recursively apply the function 'mask_stack' to a list of colorized muscle stacks to create a visualization of the muscle population. The order of muscles in the list is important since this determines the way the muscles overlap in the final image. The colorized image of the muscles are then mixed into the greyscale image of the cuticle.

In [5]:
stacked_muscles = reduce(mask_stack, 
                          [b2_colorized,
                           i1_colorized,
                           hg4_colorized,
                           iii24_colorized,
                           i2_colorized,
                           hg2_colorized,
                           hg3_colorized,
                           iii3_colorized,
                           iii1_colorized,
                           b1_colorized,
                           b3_colorized,
                           hg1_colorized])
gama = 1.4
gama_correct = lambda x: x**gama
cut_gamma = gama_correct(bkgnd_img)+1000
cuticle = (np.dstack((cut_gamma,cut_gamma,cut_gamma))*0.05).astype(uint8)
muscles = (stacked_muscles*0.9*255)
sumimg = cuticle+muscles
sumimg[sumimg>255] = 255
sumimg = sumimg.astype(uint8)
imshow(sumimg)
display(gcf());close()
In [6]:
tifffile.imsave('stacked_muscles.tiff',uint8(stacked_muscles*255))

To define the regions of interest I find the 2d contours around each muscle. For simplicity I then resample the contours to 20 points: hopefully this should allow us to average in the map extracted from other confocal stacks in the future. We then save the lists of contours to use in the construction of the model.

In [7]:
mimgs = {'b1':b1_img,
         'b2':b2_img,
         'b3':b3_img,
         'i1':i1_img,
         'i2':i2_img,
         'iii1':iii1_img,
         'iii3':iii3_img,
         'iii4':iii24_img,
         'hg1':hg1_img,
         'hg2':hg2_img,
         'hg3':hg3_img,
         'hg4':hg4_img}
model_data = dict()
#construct model
from skimage import measure
from scipy.interpolate import griddata
figure(figsize(5,5))
imshow(sumimg)
for mkey in mimgs.keys():
    img = mimgs[mkey]
    contours = [measure.find_contours(img,1)[0]]
    for n, contour in enumerate(contours):
        clen = len(contour[:,0])
        x = griddata(np.arange(clen),contour[:,0],np.linspace(clen,20))
        x = x[~isnan(x)]
        x = hstack((x,x[0]))
        y = griddata(np.arange(clen),contour[:,1],np.linspace(clen,20))
        y = y[~isnan(y)]
        y = hstack((y,y[0]))
        model_data[mkey] = vstack((y,x))
        plot(y, x,linewidth=2,color = 'r')
gca().set_xbound((0,1024))
gca().set_ybound((0,1024))
display(gcf());close()
In [8]:
class Basis(dict):    
    def __setitem__(self,key,item):
        """overload the __setitem__ method of dict so the transform and inverse
         transform matrices are computed when the basis vectors are changed"""
        try:
            if key in ['a1','a2']:
                dict.__setitem__(self,key,item)
                A = np.vstack((self['a1'],self['a2'])).T
                A_inv = np.linalg.inv(A)
                self['A'] = A
                self['A_inv'] = A_inv
            else:
                dict.__setitem__(self,key,item)
        except KeyError:
            dict.__setitem__(self,key,item)
                
        
class GeometricModel(object):   
    def __init__(self,lines,basis):
        self.lines = lines
        self.basis = basis
        ## put lines in barycentric coords
        self.barycentric = dict()
        for key in self.lines.keys():
            coords = dot(self.basis['A_inv'],(self.lines[key]-self.basis['p'][:,newaxis])) 
            self.barycentric[key] = coords.T
            
    def coords_from_basis(self,basis):
        ret = dict()
        for key in self.barycentric.keys():
            coords = np.dot(basis['A'],(self.barycentric[key]).T)+basis['p'][:,newaxis]
            ret[key] = coords
        return(ret)

class ModelView(object):
    def __init__(self,model):
        self.model = model
        
    def plot(self,basis,**kwargs):
        lines = self.model.coords_from_basis(basis)
        kwargs['color'] = 'w'
        for line in lines.values():
            plot(line[0,:],line[1,:], **kwargs)
        p = basis['p']
        a1 = basis['a1']
        a2 = basis['a2']
        kwargs['color'] = 'g'
        kwargs['head_width'] = 20
        arrow(p[0],p[1],a1[0],a1[1],**kwargs)
        kwargs['color'] = 'b'
        kwargs['head_width'] = 20
        arrow(p[0],p[1],a2[0],a2[1],**kwargs)

Now I construct a model and plot it in the reference frame of the confocal image: the position of the three bristles that form the reference frame are hard coded here.

In [9]:
figure(figsize = (10,10))
imfile = tifffile.TiffFile('im_overlay.tiff')
sumimg = imfile.asarray()

###add position of large setae 
model_data['e1'] = np.array([[ 170.02788104,  326.71685254],
                             [ 380.98203222,  919.92627014]])
model_data['e2'] = array([[ 172.83333333,  332.83333333],
                          [ 551.5       ,  164.83333333]])
e1 = model_data['e1']
e2 = model_data['e2']
muscles = dict()

for key in model_data.keys():
    if not(key in ['e1','e2']):
        muscles[key] = model_data[key]
        
confocal_basis = Basis()
confocal_basis['a1'] = e2[1]-e2[0]
confocal_basis['a2'] = e1[1]-e2[0]
confocal_basis['p'] = e2[0]

thorax = GeometricModel(muscles,confocal_basis)
thorax_view = ModelView(thorax)

imshow(sumimg)
import copy
thorax_view.plot(thorax.basis)
gca().axis('tight')
display(gcf());close()

Using the imageAnalysis.py program I manually fit the reference vectors to the gcamp imaging stacks. With the extracted point corespondences the images from the gcamp movies can then be warped back into the same frame as the confocal stack. This is done on the max projection images from a squadron of eight flies here.

In [10]:
import db_access as dba
import flylib as flb
import cv2
import cPickle
output_shape = (1024,1024)
figure(figsize = (8,16))
fly_db = dba.get_db()
swarm = flb.Squadron(fly_db,[244,243,242,241,240,239,238,237])
for i,fly in enumerate(swarm.flies):
    #fly = swarm.flies[0]
    tiffdata = np.array(fly.fly_record['experiments'].values()[0]['tiff_data']['images'])
    pkname = fly.fly_path + '/basis_fits.cpkl'
    #print 'here'
    f = open(pkname)
    img_data = cPickle.load(f)
    f.close()
    test_basis = Basis()
    for key in ['A','p','a1','a2']:
        test_basis[key] = img_data[key]

    src_p0 = test_basis['a1'] + test_basis['p']
    src_p1 = test_basis['p']
    src_p2 = test_basis['a2'] + test_basis['p']
    dst_p0 = confocal_basis['a1'] + confocal_basis['p']
    dst_p1 = confocal_basis['p']
    dst_p2 = confocal_basis['a2'] + confocal_basis['p']
    maximg = np.max(tiffdata,axis=0)
    col = mod(i,2)
    row = i/2
    subplot2grid((4,2),(row,col))
    A = cv2.getAffineTransform(np.float32([src_p0,src_p1,src_p2]),np.float32([dst_p0,dst_p1,dst_p2]))
    imshow(cv2.warpAffine(maximg.T,A,output_shape),cmap = cm.gray)
    thorax_view.plot(thorax.basis,alpha = 0.5)
    gca().set_ybound(0,1024)
    gca().set_xbound(0,1024)
    gca().set_xticks([])
    gca().set_yticks([])
display(gcf());close()

To perform ICA I take the first fly in the squadron and warp the movie into a scaled-down version of the confocal frame. The projection of the segemented muscles from the confocal stacks can then be used as pixel masks on the transformed image stack. An alternative would be to warp the masks into the Ca++ imaging frame; the approach I am taking now is conceptually simpler though no doubt computationally much slower. In the analysis these masks are used in two ways: 1) reduce the size of the problem 2) provide spatial information

In [11]:
fly = swarm.flies[0]
tiffdata = np.array(fly.fly_record['experiments'].values()[0]['tiff_data']['images'])
pkname = fly.fly_path + '/basis_fits.cpkl'
f = open(pkname)
img_data = cPickle.load(f)
f.close()
test_basis = Basis()
for key in ['A','p','a1','a2']:
    test_basis[key] = img_data[key]

src_p0 = test_basis['a1'] + test_basis['p']
src_p1 = test_basis['p']
src_p2 = test_basis['a2'] + test_basis['p']
dst_p0 = confocal_basis['a1'] + confocal_basis['p']
dst_p1 = confocal_basis['p']
dst_p2 = confocal_basis['a2'] + confocal_basis['p']
maximg = np.max(tiffdata,axis=0)
A = cv2.getAffineTransform(np.float32([src_p0,src_p1,src_p2]),np.float32([dst_p0,dst_p1,dst_p2]))

warped_movie = np.zeros((35000,256,256),dtype = np.uint8)
for frame in np.arange(shape(tiffdata)[0]):
    warped_frame = cv2.warpAffine(tiffdata[frame,:,:].T,A,output_shape)
    warped_movie[frame,:,:] = np.uint8(cv2.pyrDown(cv2.pyrDown(warped_frame)))
In [12]:
# define boolean masks for each muscle
b1_mask = b1_img >0
b2_mask = b2_img >0
b3_mask = b3_img >0

i1_mask = i1_img >0
i2_mask = i2_img >0

iii1_mask = iii1_img >0
iii3_mask = iii3_img >0
iii24_mask = iii24_img >0

hg1_mask = hg1_img >0
hg2_mask = hg2_img >0
hg3_mask = hg3_img >0
hg4_mask = hg4_img >0
In [13]:
times = np.array(fly.experiments.values()[0].exp_record['tiff_data']['axon_framebase']['times'])
#I am runing the ICA on only the first 10000 frames of Ca++ stream
def runICA(mask,n_components = 2):
    mask = cv2.pyrDown(cv2.pyrDown(np.float64(mask))) >0
    mask_signal = warped_movie[:10000,mask]
    mask_signal = np.float64(mask_signal)
    from sklearn.decomposition import FastICA, PCA
    ica = FastICA(n_components=n_components)
    S  = mask_signal/mask_signal.std(axis=0)
    S_ = ica.fit_transform(mask_signal)  # Reconstruct signals
    A_ = ica.mixing_  # Get estimated mixing matrix
    return S_,A_

def plot_ICA(mask,S_,A_,n_components = 2):
    mask = cv2.pyrDown(cv2.pyrDown(np.float64(mask))) >0
    figure(figsize(12,1.5*n_components))
    for i in range(n_components):
        col = 0 #mod(i,2)
        row = i
        subplot2grid((n_components,4),(row,col)) 
        tst_img = np.zeros((256,256))
        tst_img[mask] = A_[:,i] - np.min(A_)
        imshow(tst_img,vmin= 0)#,vmax = 100)
        little_frame = copy.copy(thorax.basis)
        little_frame['p'] = thorax.basis['p']/4
        little_frame['a1'] = thorax.basis['a1']/4
        little_frame['a2'] = thorax.basis['a2']/4
        thorax_view.plot(little_frame,lw = 0.3)
        gca().set_xticks([])
        gca().set_yticks([])
        gca().set_xbound((0,256))
        gca().set_ybound((0,256))
        if i == 0:
            subplot2grid((n_components,4),(row,col+1),colspan = 3)
        else:
            subplot2grid((n_components,4),(row,col+1),colspan = 3,sharex = ax)
        ax = gca()
        plot(times[:10000],S_[:,i])

First I performed ICA on a mask defined as the union of all the muscles. The 12 columns of the mixing matrx are reshaped into annatomical space and shown as a heat map on the left. The corresponding component is shown to the right. Time is in sec.

In [14]:
nc = 12
all_muscle_mask = b1_mask | b2_mask \
              | b3_mask | i1_mask \
              | i2_mask | iii1_mask \
              | iii3_mask | iii24_mask \
              | hg1_mask | hg2_mask \
              | hg3_mask | hg4_mask
                
S_,A_ = runICA(all_muscle_mask,n_components = nc)
plot_ICA(all_muscle_mask,S_,A_,n_components = nc);display(gcf());close()
/home/flyranch/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/sklearn/decomposition/fastica_.py:110: UserWarning: FastICA did not converge. You might want to increase the number of iterations.
  ' to increase the number of iterations.')

This approach seems like it is useful in the sense that it identifies neworks of activity, but it is difficult to interpret in terms of isolating the signal from individual muscles. This is likely due to the fact that optimization of the mixing matrix is not constrained by any spatial information. I have found several papers that describe ICA with spatial constraints, but implementing these require that I roll my own ICA optimization algorithm - and this would take some time. As a first pass I took an ad-hoc approach to applying spatial constraints: first by breaking the analysis up into the anterior and posterior cluster of muscles, then looking at the components of each muscle one-by-one.

In [15]:
#the anterior cluster
nc = 4
anterior_muscle_mask = b1_mask | b2_mask \
                       |b3_mask | i1_mask
S_,A_ = runICA(anterior_muscle_mask,n_components = nc)
plot_ICA(anterior_muscle_mask,S_,A_,n_components = nc);display(gcf());close()
In [16]:
#The posterior cluster
nc = 6
posterior_bool_mask =  iii3_mask | iii24_mask \
              | hg1_mask | hg2_mask \
              | hg3_mask | hg4_mask
S_,A_ = runICA(posterior_bool_mask,n_components = nc)
plot_ICA(posterior_bool_mask,S_,A_,n_components = nc);display(gcf());close()
In [17]:
nc = 3
b2_bool_mask =  b2_mask 
S_,A_ = runICA(b2_bool_mask,n_components = nc)
plot_ICA(b2_bool_mask,S_,A_,n_components = nc);display(gcf());close()
In [18]:
nc = 3
i1_bool_mask =  i1_mask
S_,A_ = runICA(i1_bool_mask,n_components = nc)
plot_ICA(i1_bool_mask,S_,A_,n_components = nc);display(gcf());close()
In [19]:
nc = 3
b3_bool_mask =  b3_mask
S_,A_ = runICA(b3_bool_mask,n_components = nc)
plot_ICA(b3_bool_mask,S_,A_,n_components = nc);display(gcf());close()
In [20]:
nc = 3
b1_bool_mask =  b1_mask
S_,A_ = runICA(b1_bool_mask,n_components = nc)
plot_ICA(b1_bool_mask,S_,A_,n_components = nc);display(gcf());close()
In [21]:
nc = 3
iii3_bool_mask =  iii3_mask 
S_,A_ = runICA(iii3_bool_mask,n_components = nc)
plot_ICA(iii3_bool_mask,S_,A_,n_components = nc);display(gcf());close()
In [22]:
nc = 3
iii24_bool_mask =  iii24_mask 
S_,A_ = runICA(iii24_bool_mask,n_components = nc)
plot_ICA(iii24_bool_mask,S_,A_,n_components = nc);display(gcf());close()
In [23]:
nc = 3
hg1_bool_mask =  hg1_mask 
S_,A_ = runICA(hg1_bool_mask,n_components = nc)
plot_ICA(hg1_bool_mask,S_,A_,n_components = nc);display(gcf());close()
In [24]:
nc = 3
hg2_bool_mask =  hg2_mask 
S_,A_ = runICA(hg2_bool_mask,n_components = nc)
plot_ICA(hg2_bool_mask,S_,A_,n_components = nc);display(gcf());close()
In [25]:
nc = 3
hg3_bool_mask =  hg3_mask 
S_,A_ = runICA(hg3_bool_mask,n_components = nc)
plot_ICA(hg3_bool_mask,S_,A_,n_components = nc);display(gcf());close()
In [26]:
nc = 3
hg4_bool_mask =  hg4_mask 
S_,A_ = runICA(hg4_bool_mask,n_components = nc)
plot_ICA(hg4_bool_mask,S_,A_,n_components = nc);display(gcf());close()

it is also possible to define masks that atempt to excude pixels from overlaping regions

In [27]:
nc = 3
b2_only_boolmask =  i1_mask & ~(b2_mask | b1_mask |b3_mask)
S_,A_ = runICA(b2_only_boolmask,n_components = nc)
plot_ICA(b2_only_boolmask,S_,A_,n_components = nc);display(gcf());close()
In [0]:
 
In [0]:
 
In [64]: